Another potentially interesting question we can try to answer is how much face representation we see across the task. In order to do so, we’ve trained a linear SVM classifier within subjects on the data from the UNSMOOTHED FFA localizer to classify signal into faces, objects and scrambles. We can then apply that classifier to various facets of our data. For each of these analyses, we will look at the probability of the classifier predicting a face. If the classifier does indeed predict a face, we score that TR with a “1”, otherwise, it gets a “0”, meaning chance becomes 1/3 = .33.

First, we will apply it to each TR of individual trials. Trials are split into 4 bins based on accuracy and load, and averaged over those measures to create a single time course for each category. The classifier was also applied to each TR of a “template” for each condition. In this analysis, all trials in a given condition were averaged to create a prototypical example for the category. The classifier was then applied to those averages.

We can then look at the probability of classification across subjects. First, we look at it across all subjects, but then we can look at it across our working memory capacity groups.

Finally, we will relate these neural measures to behavior, both averaged over time and for each TR.

library(reshape2)
library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.0 ──
## ✓ ggplot2 3.3.2     ✓ purrr   0.3.4
## ✓ tibble  3.0.3     ✓ dplyr   1.0.1
## ✓ tidyr   1.1.1     ✓ stringr 1.4.0
## ✓ readr   1.3.1     ✓ forcats 0.5.0
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag()    masks stats::lag()
library(patchwork)

load('data/behav.RData')
load('data/split_groups_info.RData')
load('data/DFR_split_groups_info.RData')

source("helper_fxns/split_into_groups.R")
source('helper_fxns/prep_trial_levels_for_plot.R')
source("helper_fxns/split_trial_type.R")

se <- function(x) {
  sd(x,na.rm=TRUE)/sqrt(length(x[!is.na(x)])) 
}
#classifier information
clf_acc <- read.csv('data/MVPA/fusiform_unsmoothed/clf_acc.csv')
best_c <- read.csv('data/MVPA/fusiform_unsmoothed/best_C.csv')

# averaages from template 
averages_from_template <- list(high_correct = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_high_correct_avg.csv',header=FALSE), 
                               high_incorrect = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_high_incorrect_avg.csv',header=FALSE), 
                               low_correct = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_low_correct_avg.csv',header=FALSE), 
                               low_incorrect = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_low_incorrect_avg.csv',header=FALSE))

averages_from_template[["high_load_correct_diff"]] <- averages_from_template[["high_correct"]][,1:14] - averages_from_template[["high_incorrect"]][,1:14]
averages_from_template[["low_load_correct_diff"]] <- averages_from_template[["low_correct"]][,1:14] - averages_from_template[["low_incorrect"]][,1:14]

# averages from individual trials
individual_trial_averages_probs <- list(
  high_correct = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_high_correct_indiv_avg_probs.csv',header=FALSE),
  high_incorrect = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_high_incorrect_indiv_avg_probs.csv',header=FALSE),
  low_correct = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_low_correct_indiv_avg_probs.csv',header=FALSE),
  low_incorrect = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_low_incorrect_indiv_avg_probs.csv',header=FALSE)) 

individual_trial_averages_probs[["high_load_correct_diff"]] <- individual_trial_averages_probs[["high_correct"]][,1:14] - individual_trial_averages_probs[["high_incorrect"]][,1:14]
individual_trial_averages_probs[["low_load_correct_diff"]] <- individual_trial_averages_probs[["low_correct"]][,1:14] - individual_trial_averages_probs[["low_incorrect"]][,1:14]


# averages from individual trials
individual_trial_averages_preds <- list(
  high_correct = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_high_correct_indiv_avg_preds.csv',header=FALSE),
  high_incorrect = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_high_incorrect_indiv_avg_preds.csv',header=FALSE),
  low_correct = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_low_correct_indiv_avg_preds.csv',header=FALSE),
  low_incorrect = read.csv('data/MVPA/fusiform_unsmoothed/all_suj_low_incorrect_indiv_avg_preds.csv',header=FALSE)) 

individual_trial_averages_preds[["high_load_correct_diff"]] <- individual_trial_averages_preds[["high_correct"]][,1:14] - individual_trial_averages_preds[["high_incorrect"]][,1:14]

individual_trial_averages_preds[["low_load_correct_diff"]] <- individual_trial_averages_preds[["low_correct"]][,1:14] - individual_trial_averages_preds[["low_incorrect"]][,1:14]


# replace NaNs with NA, add in PTID  

for (i in seq.int(1,6)){
  for (ii in seq.int(1,14)){
    averages_from_template[[i]][is.nan(averages_from_template[[i]][,ii]),ii] <- NA
    individual_trial_averages_probs[[i]][is.nan(individual_trial_averages_probs[[i]][,ii]),ii] <- NA
    individual_trial_averages_preds[[i]][is.nan(individual_trial_averages_preds[[i]][,ii]),ii] <- NA
    
  }
  averages_from_template[[i]]$PTID <- constructs_fMRI$PTID
  individual_trial_averages_probs[[i]]$PTID <- constructs_fMRI$PTID
  individual_trial_averages_preds[[i]]$PTID <- constructs_fMRI$PTID
  
}

save(list=c("clf_acc", "best_c", "averages_from_template", "individual_trial_averages_probs","individual_trial_averages_preds"), file = "data/MVPA_fusiform_unsmoothed.RData")

Probability of classifying faces

On average, we were able to classify faces with 79.8% accuracy (statistically significantly different from chance = 0.33). The classifier was trained on data from an independent FFA localizer. Data was masked using a bilateral fusiform mask from the AAL atlas; from that, the top 100 voxels based on the faces vs objects contrast in the overall subject GLM were selected as features for the classifier. The data used to train the classifier were shifted by 4.5 seconds to account for the hemodynamic delay.

A linear SVM classifer was used; the localizer task was split into 6 blocks of stimuli. These blocks were used in a pre-defined split for cross validation, where one block of each stimulus type was held out as a test set. Data were normalized within the training and test sets separately. Within this training set, another cross validation process was repeated to tune the cost of the model over the values [0.01, 0.1, 1, 10]. The best value of the cost function was used for each cross validation to score the classifier on the test set. The best classifer was also used to predict face presence at each TR during the DFR task.

clf_acc$average <- rowMeans(clf_acc)
t.test(clf_acc$average,mu=0.33)
## 
##  One Sample t-test
## 
## data:  clf_acc$average
## t = 44.178, df = 168, p-value < 2.2e-16
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
##  0.7093532 0.7448436
## sample estimates:
## mean of x 
## 0.7270984
template_preds_melt <- prep_trial_levels_for_plot(averages_from_template)
## Using level as id variables
individual_trial_probs_melt <- prep_trial_levels_for_plot(individual_trial_averages_probs)
## Using level as id variables
individual_trial_preds_melt <- prep_trial_levels_for_plot(individual_trial_averages_preds)
## Using level as id variables

All subjects

It’s good to see here that the probability of classifying a face in this task more or less tracks with our task structure - we see increased probability of classification when there are faces on the screen (ie during encoding and probe). Interestingly, the probability of classifying a face during the delay period is less than chance accuracy.

From individual trials

There is significantly higher likelihood of decoding a face during encoding in high load trials than low load trials.

ggplot(data=individual_trial_probs_melt %>% filter(level %in% c("high_correct", "high_incorrect", "low_correct", "low_incorrect")),aes(x=TR,y=value))+
  geom_line(aes(color=level))+
  geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
  geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
  scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
  ylab("Probability of classifier predicting a face")+
  theme_classic()

encoding_level_avg <- data.frame(high = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V6, individual_trial_averages_probs[["high_incorrect"]]$V6), na.rm=TRUE), low = rowMeans(cbind(individual_trial_averages_probs[["low_correct"]]$V6, individual_trial_averages_probs[["low_incorrect"]]$V6),na.rm=TRUE))

t.test(encoding_level_avg$high,encoding_level_avg$low,paired=TRUE)
## 
##  Paired t-test
## 
## data:  encoding_level_avg$high and encoding_level_avg$low
## t = 5.372, df = 169, p-value = 2.548e-07
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  0.04782836 0.10340299
## sample estimates:
## mean of the differences 
##              0.07561568

Here, we’re looking at correct - incorrect probability - typically, we see a higher probability in the correct trials, but it seems like in the high load trials, it’s more likely to predict a face from the incorrect trials than the correct ones during the delay period. Must take this with a grain of salt, however, since those TRs don’t have above chance likelihood of predicting a face.

ggplot(data=individual_trial_probs_melt %>% filter(level %in% c("low_load_correct_diff","high_load_correct_diff")),aes(x=TR,y=value))+
  geom_line(aes(x=TR,y=0), linetype="dotted")+
  geom_line(aes(color=level))+
  geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
  scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
  ylab("Correct - Incorrect Diff in Probability of Classifying Faces")+
  theme_classic()

From templates

In the template, however, there is no difference between load at encoding. Instead, we see that the low correct have significantly more likelihood of having a face classified at probe than either of the high load templates. We also see that for the correct trials, we’re more likely to classify from the correct vs incorrect trials.

ggplot(data=template_preds_melt%>% filter(level %in% c("high_correct", "high_incorrect", "low_correct", "low_incorrect")),aes(x=TR,y=value))+
  geom_line(aes(color=level))+
  geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
  geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
  scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
  ylab("Probability of classifier predicting a face")+
  theme_classic()

probe_data_template <- data.frame(high_correct=averages_from_template[["high_correct"]]$V11, high_incorrect = averages_from_template[["high_incorrect"]]$V11, low_correct = averages_from_template[["low_correct"]]$V11)
probe_data_template <- melt(probe_data_template)
## No id variables; using all as measure variables
probe.aov <- aov(value ~ variable, data = probe_data_template)
summary(probe.aov)
##              Df Sum Sq Mean Sq F value  Pr(>F)    
## variable      2   4.10  2.0516   12.71 4.1e-06 ***
## Residuals   507  81.82  0.1614                    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
TukeyHSD(probe.aov)
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = value ~ variable, data = probe_data_template)
## 
## $variable
##                                   diff          lwr         upr     p adj
## high_incorrect-high_correct -0.1156863 -0.218111561 -0.01326099 0.0222508
## low_correct-high_correct     0.1039216  0.001496282  0.20634686 0.0458458
## low_correct-high_incorrect   0.2196078  0.117182557  0.32203313 0.0000019

Weirdly, in the template, it is during the low load trials where we really can see prediction from the faces. This is probably because in the templates, we don’t see any classification from the low load incorrect trials. What’s maybe more notable is that there’s basically no difference between the correct and incorrect trials at high load.

ggplot(data=template_preds_melt %>% filter(level %in% c("low_load_correct_diff","high_load_correct_diff")),aes(x=TR,y=value))+
  geom_line(aes(color=level))+
  geom_line(aes(x=TR,y=0), linetype="dotted")+
  geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
  scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
  ylab("Correct - Incorrect Diff in Probability of Classifying Faces")+
  theme_classic()

I’m not 100% sure this is statistically valid, but the average prediction is higher for the predictions from template vs predictions from the individual trials.

compare_across_temp_indiv <- data.frame(template = rowMeans(cbind(averages_from_template[["high_correct"]]$V6,
                                                                  averages_from_template[["high_incorrect"]]$V6,
                                                                  averages_from_template[["low_correct"]]$V6)),
                                        indiv = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V6,
                                                               individual_trial_averages_probs[["high_incorrect"]]$V6,
                                                               individual_trial_averages_probs[["low_correct"]]$V6)))

wilcox.test(compare_across_temp_indiv$template, compare_across_temp_indiv$indiv,paired=TRUE)
## 
##  Wilcoxon signed rank test with continuity correction
## 
## data:  compare_across_temp_indiv$template and compare_across_temp_indiv$indiv
## V = 11294, p-value = 3.743e-10
## alternative hypothesis: true location shift is not equal to 0

By Working Memory Capacity Groups

split_template <- split_trial_type(averages_from_template,WM_groups)
split_indiv_probs <- split_trial_type(individual_trial_averages_probs, WM_groups)

From Indiv Trials

The only difference between groups we see is a trend towards a difference between low and medium capacity groups during probe for the incorrect high load trial, where we see higher probability of classifying faces in low capacity subjects.

indiv_avgs <- list()

for (i in seq.int(1,4)){
  indiv_avgs[[i]] <- ggplot(data = split_indiv_probs[["avgs"]][[i]][["all"]])+
    geom_line(aes(x=TR,y=mean,color=group))+
    geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=group),alpha=0.2)+
    geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
    scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
    ggtitle(names(split_indiv_probs[["avgs"]])[i])+
    ylab("Probability")+
    theme_classic()
  
}

(indiv_avgs[[1]] + indiv_avgs[[2]]) / (indiv_avgs[[3]] + indiv_avgs[[4]])+
  plot_layout(guides = "collect")+
  plot_annotation(title="Probability of classifier predicting a face from individual trials")

# encoding 
for (trial_type in seq.int(1,4)){ 
  print(names(split_indiv_probs[["all_data"]])[trial_type])
  temp.aov <- aov(split_indiv_probs[["all_data"]][[trial_type]][["all"]][,6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][,16])
  print(summary(temp.aov))
  print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
##                                                               Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2  0.141 0.07063
## Residuals                                                    165  5.256 0.03186
##                                                              F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2.217  0.112
## Residuals                                                                  
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                 diff        lwr        upr     p adj
## med-high  0.02130722 -0.0584681 0.10108255 0.8029081
## low-high -0.04802469 -0.1278000 0.03175064 0.3310454
## low-med  -0.06933191 -0.1491072 0.01044341 0.1024725
## 
## [1] "high_incorrect"
##                                                               Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2  0.004 0.00186
## Residuals                                                    165  7.662 0.04644
##                                                              F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]    0.04  0.961
## Residuals                                                                  
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                 diff         lwr        upr     p adj
## med-high 0.003492581 -0.09282154 0.09980671 0.9959532
## low-high 0.011268075 -0.08504605 0.10758220 0.9586856
## low-med  0.007775493 -0.08853863 0.10408962 0.9801058
## 
## [1] "low_correct"
##                                                               Df Sum Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2 0.0115
## Residuals                                                    165 2.0615
##                                                               Mean Sq F value
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.005726   0.458
## Residuals                                                    0.012494        
##                                                              Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]  0.633
## Residuals                                                          
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                  diff         lwr        upr     p adj
## med-high  0.009956874 -0.04000245 0.05991619 0.8848448
## low-high -0.010265305 -0.06022463 0.03969401 0.8780691
## low-med  -0.020222180 -0.07018150 0.02973714 0.6048626
## 
## [1] "low_incorrect"
##                                                               Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2  0.005 0.00253
## Residuals                                                    108  9.416 0.08719
##                                                              F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   0.029  0.971
## Residuals                                                                  
## 57 observations deleted due to missingness
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                  diff        lwr       upr     p adj
## med-high -0.016602579 -0.1896699 0.1564647 0.9717627
## low-high -0.004335922 -0.1681223 0.1594504 0.9978203
## low-med   0.012266658 -0.1458810 0.1704143 0.9814463
# probe
for (trial_type in seq.int(1,4)){ 
  print(names(split_indiv_probs[["all_data"]])[trial_type])
  temp.aov <- aov(split_indiv_probs[["all_data"]][[trial_type]][["all"]][,11] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][,16])
  print(summary(temp.aov))
  print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
##                                                               Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2 0.0267 0.01334
## Residuals                                                    165 2.2350 0.01355
##                                                              F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   0.985  0.376
## Residuals                                                                  
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 11] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                diff         lwr        upr     p adj
## med-high 0.01932699 -0.03269198 0.07134597 0.6544818
## low-high 0.03050875 -0.02151022 0.08252772 0.3499898
## low-med  0.01118176 -0.04083722 0.06320073 0.8673690
## 
## [1] "high_incorrect"
##                                                               Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2  0.137 0.06844
## Residuals                                                    165  3.901 0.02364
##                                                              F value Pr(>F)  
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2.895 0.0581 .
## Residuals                                                                    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 11] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                 diff         lwr        upr     p adj
## med-high -0.01221885 -0.08094392 0.05650623 0.9072043
## low-high  0.05350998 -0.01521510 0.12223505 0.1594136
## low-med   0.06572882 -0.00299625 0.13445390 0.0641419
## 
## [1] "low_correct"
##                                                               Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2 0.0408 0.02041
## Residuals                                                    165 1.7542 0.01063
##                                                              F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]    1.92   0.15
## Residuals                                                                  
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 11] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                 diff         lwr        upr     p adj
## med-high 0.031980835 -0.01410356 0.07806523 0.2313211
## low-high 0.034049911 -0.01203448 0.08013431 0.1908212
## low-med  0.002069077 -0.04401532 0.04815347 0.9938033
## 
## [1] "low_incorrect"
##                                                               Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2  0.031 0.01528
## Residuals                                                    108  9.000 0.08333
##                                                              F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   0.183  0.833
## Residuals                                                                  
## 57 observations deleted due to missingness
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 11] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                  diff        lwr       upr     p adj
## med-high -0.037155525 -0.2063556 0.1320446 0.8608359
## low-high -0.002564146 -0.1626906 0.1575623 0.9992019
## low-med   0.034591379 -0.1200224 0.1892052 0.8559589

From templates

There are no significant differences during encoding, but we do see a trend towards greater likelihood of face classification at TR 12 in low capacity subjects than high capacity subjects.

template_avgs <- list()

for (i in seq.int(1,4)){
  template_avgs[[i]] <- ggplot(data = split_template[["avgs"]][[i]][["all"]])+
    geom_line(aes(x=TR,y=mean,color=group))+
    geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=group),alpha=0.2)+
    geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
    scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
    ggtitle(names(split_template[["avgs"]])[i])+
    ylab("Probability")+
    theme_classic()
  
}

(template_avgs[[1]] + template_avgs[[2]]) / (template_avgs[[3]] + template_avgs[[4]])+
  plot_layout(guides = "collect")+
  plot_annotation(title="Probability of classifier predicting a face from trial templates")

for (trial_type in seq.int(1,4)){ 
  print(names(split_template[["all_data"]])[trial_type])
  temp.aov <- aov(split_template[["all_data"]][[trial_type]][["all"]][,12] ~ split_template[["all_data"]][[trial_type]][["all"]][,16])
  print(summary(temp.aov))
  print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
##                                                            Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2  0.893  0.4466
## Residuals                                                 165 25.106  0.1522
##                                                           F value Pr(>F)  
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2.935 0.0559 .
## Residuals                                                                 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 12] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
##                diff          lwr       upr     p adj
## med-high 0.11607143 -0.058273984 0.2904168 0.2595349
## low-high 0.17559524  0.001249826 0.3499407 0.0479479
## low-med  0.05952381 -0.114821603 0.2338692 0.6989408
## 
## [1] "high_incorrect"
##                                                            Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2  0.656  0.3280
## Residuals                                                 165 28.122  0.1704
##                                                           F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   1.925  0.149
## Residuals                                                               
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 12] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
##                 diff         lwr       upr     p adj
## med-high -0.02380952 -0.20832802 0.1607090 0.9499738
## low-high  0.11904762 -0.06547088 0.3035661 0.2814571
## low-med   0.14285714 -0.04166136 0.3273756 0.1626843
## 
## [1] "low_correct"
##                                                            Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2  0.155 0.07755
## Residuals                                                 165 27.289 0.16539
##                                                           F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   0.469  0.627
## Residuals                                                               
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 12] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
##                  diff        lwr       upr     p adj
## med-high -0.068452381 -0.2502197 0.1133149 0.6469553
## low-high -0.008928571 -0.1906959 0.1728387 0.9925873
## low-med   0.059523810 -0.1222435 0.2412911 0.7191910
## 
## [1] "low_incorrect"
##                                                            Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2      0       0
## Residuals                                                 108      0       0
##                                                           F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16]               
## Residuals                                                               
## 57 observations deleted due to missingness
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 12] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
##          diff lwr upr p adj
## med-high    0   0   0   NaN
## low-high    0   0   0   NaN
## low-med     0   0   0   NaN

Correlation with Behavior

Individual Trials

Averaged over time

One significant relationship between classification probability averaged over time and behavior is with DFR high load accuracy and classfication probability at high load incorrect trials - higher classification probability is associated with less accuracy. We also see a significant relationship between high load accuracy and the difference in classification probability - the stronger the correct - incorrect difference, the higher accuracy.

We also see trends (p ~ 0.06) for the relationships of omnibus span and classification probability in the difference between correct and incorrect, and for the accuracy at high load and classification in the low load correct trials.

indiv_avg_over_time <- data.frame(high_correct = rowMeans(individual_trial_averages_probs[["high_correct"]][,1:14]),
                                  high_incorrect = rowMeans(individual_trial_averages_probs[["high_incorrect"]][,1:14]),
                                  low_correct = rowMeans(individual_trial_averages_probs[["low_correct"]][,1:14]),
                                  low_incorrect = rowMeans(individual_trial_averages_probs[["low_incorrect"]][,1:14],na.rm=TRUE), 
                                  high_load_diff_correct = rowMeans(individual_trial_averages_probs[["high_load_correct_diff"]][,1:14]),
                                  low_load_diff_correct = rowMeans(individual_trial_averages_probs[["low_load_correct_diff"]][,1:14]))

indiv_avg_over_time[is.na(indiv_avg_over_time)] <- NA 
indiv_avg_over_time <- data.frame(indiv_avg_over_time, omnibus_span = constructs_fMRI$omnibus_span_no_DFR_MRI, PTID = constructs_fMRI$PTID)
indiv_avg_over_time <- merge(indiv_avg_over_time, p200_data[,c("PTID","BPRS_TOT","XDFR_MRI_ACC_L3", "XDFR_MRI_ACC_L1")],by="PTID")


plot_list <- list()

for (i in seq.int(1,6)){
  plot_data <- indiv_avg_over_time[,c(i+1,8:11)]
  colnames(plot_data)[1] <- "prob"
  plot_list[["omnibus"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=omnibus_span))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(indiv_avg_over_time)[i+1])+
    theme_classic()
  
  plot_list[["DFR_Acc"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(indiv_avg_over_time)[i+1])+
    theme_classic()
  
  plot_list[["BPRS"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(indiv_avg_over_time)[i+1])+
    theme_classic()
  
}

(plot_list[["omnibus"]][[1]] + plot_list[["omnibus"]][[2]]) /
  (plot_list[["omnibus"]][[3]] + plot_list[["omnibus"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][[5]] + plot_list[["omnibus"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["DFR_Acc"]][[1]] + plot_list[["DFR_Acc"]][[2]]) /
  (plot_list[["DFR_Acc"]][[3]] + plot_list[["DFR_Acc"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["DFR_Acc"]][[5]] + plot_list[["DFR_Acc"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][[1]] + plot_list[["BPRS"]][[2]]) /
  (plot_list[["BPRS"]][[3]] + plot_list[["BPRS"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][[5]] + plot_list[["BPRS"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(indiv_avg_over_time$high_correct, indiv_avg_over_time$omnibus_span)
## 
##  Pearson's product-moment correlation
## 
## data:  indiv_avg_over_time$high_correct and indiv_avg_over_time$omnibus_span
## t = 1.2442, df = 168, p-value = 0.2152
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.05576428  0.24257749
## sample estimates:
##        cor 
## 0.09555197
cor.test(indiv_avg_over_time$high_load_diff_correct, indiv_avg_over_time$omnibus_span)
## 
##  Pearson's product-moment correlation
## 
## data:  indiv_avg_over_time$high_load_diff_correct and indiv_avg_over_time$omnibus_span
## t = 1.8786, df = 168, p-value = 0.06204
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.007235302  0.287737432
## sample estimates:
##       cor 
## 0.1434352
cor.test(indiv_avg_over_time$low_correct, indiv_avg_over_time$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  indiv_avg_over_time$low_correct and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = -1.8276, df = 168, p-value = 0.06939
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.28416073  0.01113018
## sample estimates:
##       cor 
## -0.139618
cor.test(indiv_avg_over_time$high_incorrect, indiv_avg_over_time$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  indiv_avg_over_time$high_incorrect and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = -3.3535, df = 168, p-value = 0.0009862
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.3864259 -0.1038824
## sample estimates:
##        cor 
## -0.2504802
cor.test(indiv_avg_over_time$high_load_diff_correct, indiv_avg_over_time$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  indiv_avg_over_time$high_load_diff_correct and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = 3.2034, df = 168, p-value = 0.001625
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.09276219 0.37683252
## sample estimates:
##       cor 
## 0.2399266

All subjects

Across time

If we look at the patterns over time, we can see that BPRS tends to be positively related to classification only in the probe period during the high load trials, but starts to peak earlier in the low load trials. There is most correlation with accuracy during the encoding period. We also see a slight negative correlation with omnibus span during probe, particularly in the correct high load trials.

Next step is to pull out some of these correlations and see if they’re significant.

data_to_plot <- merge(constructs_fMRI,p200_data,by="PTID")
data_to_plot <- merge(data_to_plot,p200_clinical_zscores,by="PTID")

data_to_plot <- data_to_plot[,c(1,7,14,15,41,42)]
data_to_plot$ACC_LE <- data_to_plot$XDFR_MRI_ACC_L3 - data_to_plot$XDFR_MRI_ACC_L1

corr_to_behav_plots <- list()

for (i in seq.int(1,6)){
  measure_by_time <- data.frame(matrix(nrow=4,ncol=14))
  
  for (measure in seq.int(2,5)){
    for (TR in seq.int(1,14)){
      measure_by_time[measure-1,TR] <- cor(data_to_plot[,measure],individual_trial_averages_probs[[i]][,TR],use = "pairwise.complete.obs")
    }
  }
  
  measure_by_time <- data.frame(t(measure_by_time))
  measure_by_time$TR <- seq.int(1,14)
  
  colnames(measure_by_time)[1:4] <- colnames(data_to_plot)[2:5]
  
  melted_measure_by_time <- melt(measure_by_time,id.vars="TR")
  
  corr_to_behav_plots[[names(individual_trial_averages_probs)[i]]] <- ggplot(data=melted_measure_by_time,aes(x=TR,y=value))+
    geom_line(aes(color=variable))+
    scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
    ggtitle(names(individual_trial_averages_probs)[i])+
    theme_classic()
  
}
(corr_to_behav_plots[[1]] + corr_to_behav_plots[[2]]) / (corr_to_behav_plots[[3]] + corr_to_behav_plots[[4]])+
  plot_layout(guides="collect")+
  plot_annotation(title = "Correlation between face classification and behavioral measures")

(corr_to_behav_plots[[5]] + corr_to_behav_plots[[6]])+
  plot_layout(guides="collect")+
  plot_annotation(title = "Correlation between difference across correctness in face classification and behavioral measures")

plot_list <- list()

for(trial_type in seq.int(1,6)){ 
  temp_plot_data <- merge(p200_data, individual_trial_averages_probs[[trial_type]],by="PTID")
  temp_plot_data <- merge(temp_plot_data,constructs_fMRI,by="PTID")
  
  plot_list[["omnibus"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["omnibus"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["omnibus"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  # Acc
  
  plot_list[["L3_Acc"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["L3_Acc"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["L3_Acc"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  # BPRS  
  plot_list[["BPRS"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["BPRS"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["BPRS"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  
}

Encoding

We see significant correlations between high load accuracy and classification probability during high load correct trials and the correct-incorrect difference in high load trials.

(plot_list[["omnibus"]][["cue"]][[1]] + plot_list[["omnibus"]][["cue"]][[2]]) /  
  (plot_list[["omnibus"]][["cue"]][[3]] + plot_list[["omnibus"]][["cue"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["cue"]][[5]] + plot_list[["omnibus"]][["cue"]][[6]])  +
  plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["cue"]][[1]] + plot_list[["L3_Acc"]][["cue"]][[2]]) /  
  (plot_list[["L3_Acc"]][["cue"]][[3]] + plot_list[["L3_Acc"]][["cue"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["cue"]][[5]] + plot_list[["L3_Acc"]][["cue"]][[6]])  +
  plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["cue"]][[1]] + plot_list[["BPRS"]][["cue"]][[2]]) /  
  (plot_list[["BPRS"]][["cue"]][[3]] + plot_list[["BPRS"]][["cue"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["cue"]][[5]] + plot_list[["BPRS"]][["cue"]][[6]])  +
  plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V6,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_load_correct_diff"]]$V6 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 1.5311, df = 168, p-value = 0.1276
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.0337988  0.2631795
## sample estimates:
##       cor 
## 0.1173122
cor.test(individual_trial_averages_probs[["high_correct"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_correct"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 2.8011, df = 168, p-value = 0.005691
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.06271216 0.35060002
## sample estimates:
##       cor 
## 0.2112326
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_load_correct_diff"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 2.9385, df = 168, p-value = 0.003762
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.07301663 0.35964660
## sample estimates:
##      cor 
## 0.221101
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V6,temp_plot_data$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_load_correct_diff"]]$V6 and temp_plot_data$BPRS_TOT
## t = -1.7986, df = 168, p-value = 0.07388
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.28212606  0.01334185
## sample estimates:
##        cor 
## -0.1374484

Delay

There is a significant negative relationship between accuracy at high load and the probability of face classification at low load.

(plot_list[["omnibus"]][["delay"]][[1]] + plot_list[["omnibus"]][["delay"]][[2]]) /  
  (plot_list[["omnibus"]][["delay"]][[3]] + plot_list[["omnibus"]][["delay"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["delay"]][[5]] + plot_list[["omnibus"]][["delay"]][[6]])  +
  plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["delay"]][[1]] + plot_list[["L3_Acc"]][["delay"]][[2]]) /  
  (plot_list[["L3_Acc"]][["delay"]][[3]] + plot_list[["L3_Acc"]][["delay"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["delay"]][[5]] + plot_list[["L3_Acc"]][["delay"]][[6]])  +
  plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["delay"]][[1]] + plot_list[["BPRS"]][["delay"]][[2]]) /  
  (plot_list[["BPRS"]][["delay"]][[3]] + plot_list[["BPRS"]][["delay"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["delay"]][[5]] + plot_list[["BPRS"]][["delay"]][[6]])  +
  plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(individual_trial_averages_probs[["high_correct"]]$V8,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_correct"]]$V8 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 1.2771, df = 168, p-value = 0.2033
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.05324686  0.24495237
## sample estimates:
##        cor 
## 0.09805323
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V8,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_load_correct_diff"]]$V8 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 1.1286, df = 168, p-value = 0.2607
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.06461195  0.23420229
## sample estimates:
##       cor 
## 0.0867459
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V8,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_load_correct_diff"]]$V8 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 0.98464, df = 111, p-value = 0.3269
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.09328127  0.27308760
## sample estimates:
##        cor 
## 0.09305201
cor.test(individual_trial_averages_probs[["low_correct"]]$V8,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_correct"]]$V8 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -2.3675, df = 168, p-value = 0.01905
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.32149962 -0.02997658
## sample estimates:
##        cor 
## -0.1796801
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V8,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_load_correct_diff"]]$V8 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -0.5013, df = 111, p-value = 0.6172
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.2302362  0.1384173
## sample estimates:
##         cor 
## -0.04752776
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V8,temp_plot_data$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_load_correct_diff"]]$V8 and temp_plot_data$BPRS_TOT
## t = -1.4859, df = 168, p-value = 0.1392
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.25994993  0.03726117
## sample estimates:
##        cor 
## -0.1138918

Probe

Probability of classification at incorrect high load trial is significantly negatively correlated with high load accuracy during probe and positively with correct - incorrect difference at high load.

(plot_list[["omnibus"]][["probe"]][[1]] + plot_list[["omnibus"]][["probe"]][[2]]) /  
  (plot_list[["omnibus"]][["probe"]][[3]] + plot_list[["omnibus"]][["probe"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["probe"]][[5]] + plot_list[["omnibus"]][["probe"]][[6]])  +
  plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["probe"]][[1]] + plot_list[["L3_Acc"]][["probe"]][[2]]) /  
  (plot_list[["L3_Acc"]][["probe"]][[3]] + plot_list[["L3_Acc"]][["probe"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["probe"]][[5]] + plot_list[["L3_Acc"]][["probe"]][[6]])  +
  plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["probe"]][[1]] + plot_list[["BPRS"]][["probe"]][[2]]) /  
  (plot_list[["BPRS"]][["probe"]][[3]] + plot_list[["BPRS"]][["probe"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["probe"]][[5]] + plot_list[["BPRS"]][["probe"]][[6]])  +
  plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(individual_trial_averages_probs[["high_incorrect"]]$V11,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_incorrect"]]$V11 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -2.368, df = 168, p-value = 0.01902
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.32153443 -0.03001538
## sample estimates:
##        cor 
## -0.1797177
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V11,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_load_correct_diff"]]$V11 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 2.7013, df = 168, p-value = 0.007615
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.05520578 0.34397596
## sample estimates:
##       cor 
## 0.2040247
behav_classification_corr_list <- list()

for (trial_type in seq.int(1,6)){ 
  group_corrs_omnibus <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_omnibus) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_omnibus) <- seq.int(1,14)
  
  group_corrs_acc <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_acc) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_acc) <- seq.int(1,14)
  
  group_corrs_BPRS <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_BPRS) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_BPRS) <- seq.int(1,14)
  
  for (level in seq.int(1,3)){ 
    temp_subj <- split_indiv_probs[["all_data"]][[trial_type]][[level]][order(split_indiv_probs[["all_data"]][[trial_type]][[level]]$PTID),]
    temp_data <- data_to_plot[data_to_plot$PTID %in% split_indiv_probs[["all_data"]][[trial_type]][[level]]$PTID,]
    
    for (TR in seq.int(1,14)){
      
      group_corrs_omnibus[level,TR] <- cor(temp_subj[,TR],temp_data$omnibus_span_no_DFR_MRI,use="pairwise.complete.obs")
      group_corrs_acc[level,TR] <- cor(temp_subj[,TR],temp_data$XDFR_MRI_ACC_L3,use="pairwise.complete.obs")
      group_corrs_BPRS[level,TR] <- cor(temp_subj[,TR],temp_data$BPRS_TOT.x,use="pairwise.complete.obs")
      
    }
    group_corrs_acc$level <- factor(rownames(group_corrs_acc))
    group_corrs_BPRS$level <- factor(rownames(group_corrs_acc))
    group_corrs_omnibus$level <- factor(rownames(group_corrs_acc))
    
  }
  
  behav_classification_corr_list[["omnibus"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_omnibus
  behav_classification_corr_list[["BPRS"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_BPRS
  behav_classification_corr_list[["L3_Acc"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_acc
}

By Working Memory Capacity

behav_classification_corr_melt <- list()
behav_split_plot_list <- list()

for (measure in seq.int(1,3)){
  for (trial_type in seq.int(1,6)){ 
    behav_classification_corr_melt[[names(behav_classification_corr_list)[measure]]][[names(behav_classification_corr_list[[measure]])[trial_type]]] <- melt(behav_classification_corr_list[[measure]][[trial_type]],id.vars="level")
    behav_classification_corr_melt[[measure]][[trial_type]]$variable <- as.numeric(as.character(behav_classification_corr_melt[[measure]][[trial_type]]$variable))
    behav_classification_corr_melt[[measure]][[trial_type]]$level <- factor(behav_classification_corr_melt[[measure]][[trial_type]]$level, levels=c("high","med","low"))
    
    behav_split_plot_list[[names(behav_classification_corr_melt)[measure]]][[names(behav_classification_corr_melt[[measure]])[trial_type]]] <- 
      ggplot(data = behav_classification_corr_melt[[measure]][[trial_type]],aes(x=variable,y=value))+
      geom_line(aes(color=level))+
      scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
      ggtitle(names(behav_classification_corr_list[[measure]])[trial_type])+
      xlab("TR")+
      ylab("Correlation")+
      theme_classic()
    
  }
}
(behav_split_plot_list[["omnibus"]][[1]] + behav_split_plot_list[["omnibus"]][[2]]) / 
  (behav_split_plot_list[["omnibus"]][[3]] + behav_split_plot_list[["omnibus"]][[4]])+
  plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["omnibus"]][[5]] + behav_split_plot_list[["omnibus"]][[6]]) + 
  plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["L3_Acc"]][[1]] + behav_split_plot_list[["L3_Acc"]][[2]]) / 
  (behav_split_plot_list[["L3_Acc"]][[3]] + behav_split_plot_list[["L3_Acc"]][[4]])+
  plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["L3_Acc"]][[5]] + behav_split_plot_list[["L3_Acc"]][[6]]) +
  plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["BPRS"]][[1]] + behav_split_plot_list[["BPRS"]][[2]]) / 
  (behav_split_plot_list[["BPRS"]][[3]] + behav_split_plot_list[["BPRS"]][[4]])+
  plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["BPRS"]][[5]] + behav_split_plot_list[["BPRS"]][[6]]) +
  plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

Template

Averaged over time

If we average over time, there are no relationships between template classification and behavior.

template_avg_over_time <- data.frame(high_correct = rowMeans(averages_from_template[["high_correct"]][,1:14]),
                                     high_incorrect = rowMeans(averages_from_template[["high_incorrect"]][,1:14]),
                                     low_correct = rowMeans(averages_from_template[["low_correct"]][,1:14]),
                                     low_incorrect = rowMeans(averages_from_template[["low_incorrect"]][,1:14],na.rm=TRUE), 
                                     high_load_diff_correct = rowMeans(averages_from_template[["high_load_correct_diff"]][,1:14]),
                                     low_load_diff_correct = rowMeans(averages_from_template[["low_load_correct_diff"]][,1:14]))

template_avg_over_time[is.na(template_avg_over_time)] <- NA 
template_avg_over_time <- data.frame(template_avg_over_time, omnibus_span = constructs_fMRI$omnibus_span_no_DFR_MRI, PTID = constructs_fMRI$PTID)
template_avg_over_time <- merge(template_avg_over_time, p200_data[,c("PTID","BPRS_TOT","XDFR_MRI_ACC_L3", "XDFR_MRI_ACC_L1")],by="PTID")


plot_list <- list()

for (i in seq.int(1,6)){
  plot_data <- template_avg_over_time[,c(i+1,8:11)]
  colnames(plot_data)[1] <- "prob"
  plot_list[["omnibus"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=omnibus_span))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(template_avg_over_time)[i+1])+
    theme_classic()
  
  plot_list[["DFR_Acc"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(template_avg_over_time)[i+1])+
    theme_classic()
  
  plot_list[["BPRS"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(template_avg_over_time)[i+1])+
    theme_classic()
  
}

(plot_list[["omnibus"]][[1]] + plot_list[["omnibus"]][[2]]) /
  (plot_list[["omnibus"]][[3]] + plot_list[["omnibus"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][[5]] + plot_list[["omnibus"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["DFR_Acc"]][[1]] + plot_list[["DFR_Acc"]][[2]]) /
  (plot_list[["DFR_Acc"]][[3]] + plot_list[["DFR_Acc"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["DFR_Acc"]][[5]] + plot_list[["DFR_Acc"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][[1]] + plot_list[["BPRS"]][[2]]) /
  (plot_list[["BPRS"]][[3]] + plot_list[["BPRS"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][[5]] + plot_list[["BPRS"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

All subjects

Over time

data_to_plot <- merge(constructs_fMRI,p200_data,by="PTID")
data_to_plot <- merge(data_to_plot,p200_clinical_zscores,by="PTID")

data_to_plot <- data_to_plot[,c(1,7,14,15,41,42)]
data_to_plot$ACC_LE <- data_to_plot$XDFR_MRI_ACC_L3 - data_to_plot$XDFR_MRI_ACC_L1

corr_to_behav_plots <- list()


for (i in seq.int(1,6)){
  measure_by_time <- data.frame(matrix(nrow=4,ncol=14))
  
  for (measure in seq.int(2,5)){
    for (TR in seq.int(1,14)){
      measure_by_time[measure-1,TR] <- cor(data_to_plot[,measure],averages_from_template[[i]][,TR],use = "pairwise.complete.obs")
    }
  }
  
  measure_by_time <- data.frame(t(measure_by_time))
  measure_by_time$TR <- seq.int(1,14)
  
  colnames(measure_by_time)[1:4] <- colnames(data_to_plot)[2:5]
  
  melted_measure_by_time <- melt(measure_by_time,id.vars="TR")
  
  corr_to_behav_plots[[names(averages_from_template)[i]]] <- ggplot(data=melted_measure_by_time,aes(x=TR,y=value))+
    geom_line(aes(color=variable))+
    scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
    ggtitle(names(averages_from_template)[i])+
    theme_classic()
  
}
(corr_to_behav_plots[[1]] + corr_to_behav_plots[[2]]) / (corr_to_behav_plots[[3]] + corr_to_behav_plots[[4]])+
  plot_layout(guides="collect")+
  plot_annotation(title = "Correlation between face classification and behavioral measures")
## Warning: Removed 56 row(s) containing missing values (geom_path).

(corr_to_behav_plots[[5]] + corr_to_behav_plots[[6]])+
  plot_layout(guides="collect")+
  plot_annotation(title = "Correlation between face classification and behavioral measures")

plot_list <- list()

for(trial_type in seq.int(1,6)){ 
  temp_plot_data <- merge(p200_data, averages_from_template[[trial_type]],by="PTID")
  temp_plot_data <- merge(temp_plot_data,constructs_fMRI,by="PTID")
  
  plot_list[["omnibus"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["omnibus"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["omnibus"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  # Acc
  
  plot_list[["L3_Acc"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["L3_Acc"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["L3_Acc"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  # BPRS  
  plot_list[["BPRS"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["BPRS"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["BPRS"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  
}

Encoding

(plot_list[["omnibus"]][["cue"]][[1]] + plot_list[["omnibus"]][["cue"]][[2]]) /  
  (plot_list[["omnibus"]][["cue"]][[3]] + plot_list[["omnibus"]][["cue"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["cue"]][[5]] + plot_list[["omnibus"]][["cue"]][[6]]) +
  plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["cue"]][[1]] + plot_list[["L3_Acc"]][["cue"]][[2]]) /  
  (plot_list[["L3_Acc"]][["cue"]][[3]] + plot_list[["L3_Acc"]][["cue"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["cue"]][[5]] + plot_list[["L3_Acc"]][["cue"]][[6]]) +
  plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["cue"]][[1]] + plot_list[["BPRS"]][["cue"]][[2]]) /  
  (plot_list[["BPRS"]][["cue"]][[3]] + plot_list[["BPRS"]][["cue"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["cue"]][[5]] + plot_list[["BPRS"]][["cue"]][[6]]) +
  plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

Delay

(plot_list[["omnibus"]][["delay"]][[1]] + plot_list[["omnibus"]][["delay"]][[2]]) /  
  (plot_list[["omnibus"]][["delay"]][[3]] + plot_list[["omnibus"]][["delay"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["delay"]][[5]] + plot_list[["omnibus"]][["delay"]][[6]]) +
  plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["delay"]][[1]] + plot_list[["L3_Acc"]][["delay"]][[2]]) /  
  (plot_list[["L3_Acc"]][["delay"]][[3]] + plot_list[["L3_Acc"]][["delay"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["delay"]][[5]] + plot_list[["L3_Acc"]][["delay"]][[6]]) +
  plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["delay"]][[1]] + plot_list[["BPRS"]][["delay"]][[2]]) /  
  (plot_list[["BPRS"]][["delay"]][[3]] + plot_list[["BPRS"]][["delay"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["delay"]][[5]] + plot_list[["BPRS"]][["delay"]][[6]]) +
  plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(averages_from_template[["high_incorrect"]]$V8,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_incorrect"]]$V8 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -0.27618, df = 168, p-value = 0.7828
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.1712677  0.1296273
## sample estimates:
##         cor 
## -0.02130256

Probe

There is a significant relationship with BPRS and the difference between high correct and incorrect trials.

(plot_list[["omnibus"]][["probe"]][[1]] + plot_list[["omnibus"]][["probe"]][[2]]) /  
  (plot_list[["omnibus"]][["probe"]][[3]] + plot_list[["omnibus"]][["probe"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["probe"]][[5]] + plot_list[["omnibus"]][["probe"]][[6]]) +
  plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["probe"]][[1]] + plot_list[["L3_Acc"]][["probe"]][[2]]) /  
  (plot_list[["L3_Acc"]][["probe"]][[3]] + plot_list[["L3_Acc"]][["probe"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["probe"]][[5]] + plot_list[["L3_Acc"]][["probe"]][[6]]) +
  plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["probe"]][[1]] + plot_list[["BPRS"]][["probe"]][[2]]) /  
  (plot_list[["BPRS"]][["probe"]][[3]] + plot_list[["BPRS"]][["probe"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["probe"]][[5]] + plot_list[["BPRS"]][["probe"]][[6]]) +
  plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(averages_from_template[["low_load_correct_diff"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["low_load_correct_diff"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -1.1842, df = 111, p-value = 0.2389
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.29043447  0.07457108
## sample estimates:
##        cor 
## -0.1116974
cor.test(averages_from_template[["high_load_correct_diff"]]$V11,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_load_correct_diff"]]$V11 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 1.6955, df = 168, p-value = 0.09183
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.02122201  0.27485490
## sample estimates:
##       cor 
## 0.1297065
cor.test(averages_from_template[["high_correct"]]$V11,temp_plot_data$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_correct"]]$V11 and temp_plot_data$BPRS_TOT
## t = 1.377, df = 168, p-value = 0.1703
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.04559443  0.25214963
## sample estimates:
##       cor 
## 0.1056448
cor.test(averages_from_template[["high_incorrect"]]$V11,temp_plot_data$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_incorrect"]]$V11 and temp_plot_data$BPRS_TOT
## t = -0.89905, df = 168, p-value = 0.3699
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.21744667  0.08217293
## sample estimates:
##         cor 
## -0.06919716
cor.test(averages_from_template[["high_load_correct_diff"]]$V11,temp_plot_data$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_load_correct_diff"]]$V11 and temp_plot_data$BPRS_TOT
## t = 1.9957, df = 168, p-value = 0.04759
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.00170123 0.29591296
## sample estimates:
##       cor 
## 0.1521765
behav_classification_corr_list <- list()

for (trial_type in seq.int(1,6)){ 
  group_corrs_omnibus <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_omnibus) <- names(split_template[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_omnibus) <- seq.int(1,14)
  
  group_corrs_acc <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_acc) <- names(split_template[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_acc) <- seq.int(1,14)
  
  group_corrs_BPRS <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_BPRS) <- names(split_template[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_BPRS) <- seq.int(1,14)
  
  for (level in seq.int(1,3)){ 
    temp_subj <- split_template[["all_data"]][[trial_type]][[level]][order(split_template[["all_data"]][[trial_type]][[level]]$PTID),]
    temp_data <- data_to_plot[data_to_plot$PTID %in% split_template[["all_data"]][[trial_type]][[level]]$PTID,]
    
    for (TR in seq.int(1,14)){
      
      group_corrs_omnibus[level,TR] <- cor(temp_subj[,TR],temp_data$omnibus_span_no_DFR_MRI,use="pairwise.complete.obs")
      group_corrs_acc[level,TR] <- cor(temp_subj[,TR],temp_data$XDFR_MRI_ACC_L3,use="pairwise.complete.obs")
      group_corrs_BPRS[level,TR] <- cor(temp_subj[,TR],temp_data$BPRS_TOT.x,use="pairwise.complete.obs")
      
    }
    group_corrs_acc$level <- factor(rownames(group_corrs_acc))
    group_corrs_BPRS$level <- factor(rownames(group_corrs_acc))
    group_corrs_omnibus$level <- factor(rownames(group_corrs_acc))
    
  }
  
  behav_classification_corr_list[["omnibus"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_omnibus
  behav_classification_corr_list[["BPRS"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_BPRS
  behav_classification_corr_list[["L3_Acc"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_acc
}

By Working Memory Capacity

behav_classification_corr_melt <- list()
behav_split_plot_list <- list()

for (measure in seq.int(1,3)){
  for (trial_type in seq.int(1,6)){ 
    behav_classification_corr_melt[[names(behav_classification_corr_list)[measure]]][[names(behav_classification_corr_list[[measure]])[trial_type]]] <- melt(behav_classification_corr_list[[measure]][[trial_type]],id.vars="level")
    behav_classification_corr_melt[[measure]][[trial_type]]$variable <- as.numeric(as.character(behav_classification_corr_melt[[measure]][[trial_type]]$variable))
    behav_classification_corr_melt[[measure]][[trial_type]]$level <- factor(behav_classification_corr_melt[[measure]][[trial_type]]$level, levels=c("high","med","low"))
    
    behav_split_plot_list[[names(behav_classification_corr_melt)[measure]]][[names(behav_classification_corr_melt[[measure]])[trial_type]]] <- 
      ggplot(data = behav_classification_corr_melt[[measure]][[trial_type]],aes(x=variable,y=value))+
      geom_line(aes(color=level))+
      scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
      ggtitle(names(behav_classification_corr_list[[measure]])[trial_type])+
      xlab("TR")+
      ylab("Correlation")+
      theme_classic()
    
  }
}
(behav_split_plot_list[["omnibus"]][[1]] + behav_split_plot_list[["omnibus"]][[2]]) / 
  (behav_split_plot_list[["omnibus"]][[3]] + behav_split_plot_list[["omnibus"]][[4]])+
  plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")
## Warning: Removed 42 row(s) containing missing values (geom_path).

(behav_split_plot_list[["omnibus"]][[5]] + behav_split_plot_list[["omnibus"]][[6]]) + 
  plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["L3_Acc"]][[1]] + behav_split_plot_list[["L3_Acc"]][[2]]) / 
  (behav_split_plot_list[["L3_Acc"]][[3]] + behav_split_plot_list[["L3_Acc"]][[4]])+
  plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")
## Warning: Removed 42 row(s) containing missing values (geom_path).

(behav_split_plot_list[["L3_Acc"]][[5]] + behav_split_plot_list[["L3_Acc"]][[6]]) + 
  plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["BPRS"]][[1]] + behav_split_plot_list[["BPRS"]][[2]]) / 
  (behav_split_plot_list[["BPRS"]][[3]] + behav_split_plot_list[["BPRS"]][[4]])+
  plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")
## Warning: Removed 42 row(s) containing missing values (geom_path).

(behav_split_plot_list[["BPRS"]][[5]] + behav_split_plot_list[["BPRS"]][[6]]) + 
  plot_annotation("BPRS Total with Face Classification Probability by Group")+
  plot_layout(guides="collect")